diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c index 3c92b6865097d..e3a627bce6931 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, @@ -66,7 +66,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, @@ -78,7 +78,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, @@ -90,7 +90,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, @@ -101,7 +101,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, @@ -112,7 +112,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c index ff506a20cc263..feb9dcd24a51a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg4ei16.c index ad118d5a33f7a..f136e3665c2b4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg5ei16.c index 931194870ad4f..84b4a800cc96b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg6ei16.c index d2ac1ef8fe0ad..01b393fb260c2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg7ei16.c index d541ed377013c..8b08ce64519ea 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg8ei16.c index 69de1bfd4e630..bf143bf385dbd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg2e16ff.c index 6c9749e61ca0b..c22a7e26a3e2b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg2e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -95,7 +95,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -109,7 +109,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -123,7 +123,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -137,7 +137,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg3e16ff.c index 78085cbcf0b9c..77b4ccafa4d83 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg3e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -95,7 +95,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -109,7 +109,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg4e16ff.c index d194cebd20f5a..cc395b1124e68 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg4e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -95,7 +95,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -109,7 +109,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg5e16ff.c index 6be747289efb2..2295b9f86f15c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg5e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg6e16ff.c index 469a116ace864..9a92c904781cd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg6e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg7e16ff.c index 903f9fe057d32..84dfb9378ac2e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg7e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg8e16ff.c index 70e5ce95ba0c7..49b4cd08ea38d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vlseg8e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8(const __bf16 *rs1, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -81,7 +81,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg2ei16.c index 6d408c1df6864..f525c20a0610f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, @@ -66,7 +66,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, @@ -78,7 +78,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, @@ -90,7 +90,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, @@ -101,7 +101,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, @@ -112,7 +112,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg3ei16.c index 0cf8ae8a74e47..0713bc159f4e0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg4ei16.c index 2a3d4262567b3..96964f51031e6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg5ei16.c index e6d3b53b26165..cd980f2787746 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg6ei16.c index 8e666defb287b..9ef9cf46e6d64 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg7ei16.c index 4eec8dd2e08d9..dd848fea47922 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg8ei16.c index a5f0a59097564..13c4b05b2625a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vluxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg2ei16.c index 46fac12e6d7e3..6b16ed734f3f3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, @@ -66,7 +66,7 @@ void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, @@ -78,7 +78,7 @@ void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, @@ -90,7 +90,7 @@ void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -101,7 +101,7 @@ void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, @@ -112,7 +112,7 @@ void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg3ei16.c index 54cfa7aa666ca..f6d0abab5dbb9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg4ei16.c index 76fb573f661a5..bbfa3da5fd604 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg5ei16.c index 7a24b054e2b1c..5061bbdcfab52 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg6ei16.c index e382a85b15527..4557b91d5b680 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg7ei16.c index 92258de5fd354..b5cbcce6341fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg8ei16.c index 96c3995c370cf..686da3f59edf2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsoxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg2ei16.c index c2cf18ed9de7d..0265d1de9c112 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, @@ -66,7 +66,7 @@ void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, @@ -78,7 +78,7 @@ void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, @@ -90,7 +90,7 @@ void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -101,7 +101,7 @@ void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, @@ -112,7 +112,7 @@ void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg3ei16.c index 5cce8486d4aa1..11ac764b0b63f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg4ei16.c index 5b8c6e82ca324..951f4964f7180 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg5ei16.c index 9095d946fd858..ecf92d16a355b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg6ei16.c index 5119250adb9e1..c1d0c90907167 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg7ei16.c index 056fa42d3dd22..cc30b4e72fedd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg8ei16.c index eab39f75ba8e7..98a01c286cdab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vsuxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c index 8253b461af7df..6ddc795e0bb95 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c index b43b6266e2b3e..a4f20db865bbf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c index 7d7b13a8b0394..cb708667967f1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c index 84d421b8a3181..ee1074d713b55 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c index fc6e0cd13a5d4..96b7a04ccdbb9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c index 8b591fd5de4d7..907f13b9533f1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c index 084ad1bf25588..2e7eb28f95b48 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c index 7b9f719808d2f..fa14ae29e1aa1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c index 40bfd8ed979d6..dc480a09f9932 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c index 2fdce827defcf..4f9533c04c4b4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c index 456989831fa0d..02135ec634044 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c index 31f49fda66ae3..c1ad7aa3f25a4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c index 0edd52db99e4c..0b0c8a606dd98 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c index 09aec05409e5f..6af4926f970c1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c index 69215e4c99d25..460f0d4416174 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c index 92e821a87589b..711f4a2666fca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c index 0f01011cc765f..1fc8a91b2edef 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c index 4d2b7ae3fd422..d3487ebf26ba7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c index 8805f232bd538..d365f3b5a83f9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c index 17505b7974637..88475c15b2032 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c index 3ac00aa0a868d..0265ce70e3ff0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c index 227fdb257f5da..8b6be68f39506 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c index 3354fa4f997f7..b196967a8fa34 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c index 578d3b02ca8bb..0eb52a8167f07 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c index 64ff6caf73bde..5bcaf76ebbb28 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c index 7641a5968f0c6..cc47569df80dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c index 49278c40023e9..f44940e4f785f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c index 87d9b95dd1d2c..9135e43edccff 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c index 86bbf882d95f1..88265f5e04c07 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_m(vbool4_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c index 2696673b9e3f2..cef7749bc8394 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_m(vbool8_t mask, const float *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_m(vbool8_t mask, const int32_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c index d4088f0eff4fd..9cf118919fd3d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_m(vbool16_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c index de45d9c0e093e..baa435fbb2bf5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_m(vbool2_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c index d49604e9db3d4..2e38ceecff279 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c index 641c2c413c023..df2d985516953 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c index 21dcf0c915886..d17acb73435c6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c index 9da1efa32cddf..fc62baa37410b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c index ec495e9040729..cfda2cd5d87f0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c index faee24b3e5a41..ac403e192d282 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c index e8a856ae096cb..47d2eb8ece5dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c index bb268a09abe00..d784020fb1945 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c index f3d6ba3f9ef72..d6db2a8a24246 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c index 61be82853a7a6..8eb139a92e9c3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c index 916bf4bb0cc05..395c509859b03 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c index 24e2238855679..d000a1b74c2e2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c index c2a4741417330..77558b7673bc2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c index 257494a777077..af2a9902c9bf6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c index fc84de20d20e7..0d7a2ef2e19c4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c index 64469b56357c3..f91064648e733 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c index f16e557208879..66d06b24f2746 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c index 64711d8bb6cb2..864b784f96fb9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c index 68e845110c4ec..6e8994cc89cf8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c index 66102fe6482e0..c04347259deaf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c index 3db74ec9b6326..7ca95d7f58d73 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8(const _Float16 *base, size_t *new_vl // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8(const _Float16 *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8(const int16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8(const int16_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8(const uint16_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8(const uint16_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c index 3d63354ac2573..0c83da1d00cc3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8(const float *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8(const float *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8(const int32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8(const int32_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8(const uint32_t *base, size_t *new_vl, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8(const uint32_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c index dff16c7e09537..b2253ef0f47b4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8(const double *base, size_t *new_vl, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8(const int64_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8(const uint64_t *base, size_t *new_vl, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c index efebd4d789de6..46b4f6ab6b579 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8(const int8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8(const int8_t *base, size_t *new_vl, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8(const uint8_t *base, size_t *new_vl, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8(const uint8_t *base, size_t *new_vl, size_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c index cdebec226b9d5..0dad625a30ddb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c index 10f44bbbe2106..fd3b01ec7113e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c index 8d9c86a0918ac..d21cf31a26883 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c index a02c5e19eafd0..ee07437d56e73 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c index 3b47631d46b85..f1e0292efb164 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c index 7f7ee7b151e34..691a413751527 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c index 8a63ede1adcf3..2b6f10ddd1310 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c index 3abeb0593f6cc..6f48b3e4d0771 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c index 26a309e2af78f..47cb7275b9acb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c index b00c95f63bdb4..fc0351ec70d0e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c index 49c05bb5619b3..25edf108b58d8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c index cd37135698b07..91f8613fefc45 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c index 105b6af9ba7e8..2292a895252f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c index bc5fad7190a88..59d9d1d16edae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c index 21d245fe45282..8734285678df8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c index d52b6301a9479..cd73cdf76df47 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c index ac0caeea5d0c5..2f1b6843ba9e8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c index ab475ae2b55c0..819d8328eca1d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c index f50227ec8ecfd..1be7dc4a50cd0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c index efa2d311f24d0..ec9b1b9dc15be 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c index 2bbfa0cb5a6e3..092b0b5999c59 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c index 3e5ef9436e96b..a47d057326dac 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c index 8eec673bc7f17..40e749bf60545 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c index b0e042424cb8e..a813871a82ab3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c index 55efc4f24ae09..f67c4fdfbec23 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c index 72acb61383bf8..5b8da945b322f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c index ee90edda7a3ba..0c18081ffb879 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c index ed44f7b08a0ea..e0e2618b94d18 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c index 144959a762e01..629936f76b74c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c index 366f83faf555c..e340289164a7d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c index f0f07a1c99410..e89048b1a2f90 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c index 5dbbc384179c8..28f42913287d2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c index 4aed99b8d3b86..785016338859f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c index 51c98fc77b7e1..f55b084595130 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c index d636899c2a18c..cf1f4214c1788 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c index 151a4f1aa98d8..e2c29e6a98a5a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c index 4166a34282d7d..9467938a037f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c index 22567e546c516..8bbb9f86b0790 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c index 5a5944f4cd6a3..e91d73aac16c3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c index 02f8ec6f693f5..d96f911957328 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c index 94153306d2018..de5db2869f017 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c index a72fd4149e9f6..73bb9febe6e63 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c index 25d32d66dd76e..1f7fe50ea6bb3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c index 53ff659495e37..b53ac68c799e2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c index cf05ffeb8d972..ad18d970ec5f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c index 7813c218f21e7..755cb31ac4d73 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c index 1d6d5816f304f..4df41ab873358 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c index 05ca77020c1da..3f197a28951b0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c index 4d151f951ff8f..aa14469fa2ad9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c index 84cbda116810f..47fdb5193d70b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c index 086a0c3938933..21f7f8e198852 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c index 35678e7594332..f5871641e5cf6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c index 6551b4b86d96e..656ee8e92071a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c index 13550fbcc0278..52adfb62ceea1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c index bf8884f748142..6d0a9f4c7e5b3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c index 03959b29783a3..b37b79ce73bde 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c index f0c987426532f..0bf332920c77e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c index a0ccec5378293..19a340be7b4ba 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c index 16de15c3f4a53..7eaf7ad6b5112 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c index 8dc1346edffc8..5aca1f8017ec8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c index f9b76824236b3..03a05e3949cd3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c index eb7e7162210e6..3540a9b5ec1c7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c index d2d71226d0783..213350d285668 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c index 7f66f3511758b..5ecdd65c29e6d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c index 7689c51f46aa0..b9132b9273dd3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c index 2bef098811a91..5910e50273f6b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c index 27129b8f0ad29..40dfc6d58bfe1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c index c24467f36e72c..fcdd26bff18f8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c index e082c795ead05..b63fae90a4978 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c index 5b49712bdd26c..18bffadd41ff7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c index b785f2b3e1ce6..0f10905732bc1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c index 3cbe449810cf1..4a7479673ff65 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c index 2bfa6203c199b..f0928b55c3a1f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c index fee5c6c59b7ac..63a982958382f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c index 0e37256d7cbbd..968defd702556 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c index 30080d103f1b3..895f5c8882eb5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c index 5e034126e3890..6717a6ab568d5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c index a260ee0abfa14..d4f9a324d55c2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c index 02496947a0a3d..431459dc548fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c index 91dbe997e86fd..7570129544bb0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c index e84b9308f6f15..06997ebfb67aa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c index 02df0ac48ed0d..40fe2b93b639a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c index 144f31d8797bf..4d3deaa462c40 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c index f84c43147b415..ffe23453e0e38 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg2ei16.c index b8072943b4cc7..cbeabc47ea9cc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, @@ -66,7 +66,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, @@ -78,7 +78,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, @@ -90,7 +90,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, @@ -101,7 +101,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, @@ -112,7 +112,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg3ei16.c index 1a803532613b2..5ec3fa5dc2a87 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg4ei16.c index 181cf80e7a161..c2308e33763b2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg5ei16.c index 2af306751af59..586df70881a0b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg6ei16.c index f4f95732abdda..d747a3dd9f9dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg7ei16.c index f7b4b267ba482..6066ccc25fb60 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg8ei16.c index 26a33b540929b..1e0a1c2f3f03b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vloxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg2e16ff.c index 8218915b89378..5fa5f9d9e4bff 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg2e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -67,7 +67,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg3e16ff.c index 59a87ed655780..435df4bfd378b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg3e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg4e16ff.c index fb94ee68b5d44..90519c8d0e0b2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg4e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -53,7 +53,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg5e16ff.c index f0e7387f7864d..0cf9b49f58e22 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg5e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg6e16ff.c index 0fe3fd05d682e..61ccdcd86c8bd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg6e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg7e16ff.c index 2679e8ea8b168..d9e6de09d7ec2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg7e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg8e16ff.c index 3dc2d5489a7ef..d68f653872f4b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vlseg8e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -25,7 +25,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -39,7 +39,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg2ei16.c index 64397c5e4d36a..91138f23a484b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, @@ -66,7 +66,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, @@ -78,7 +78,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, @@ -90,7 +90,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, @@ -101,7 +101,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, @@ -112,7 +112,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg3ei16.c index 9d5d8286440e0..eccc7bf827769 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg4ei16.c index 3cc14a562b561..0a8f9fde504b2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, @@ -55,7 +55,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, @@ -67,7 +67,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, @@ -79,7 +79,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, @@ -90,7 +90,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg5ei16.c index b9f7474400121..1b38c8baf740e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg6ei16.c index 0582eed3d29c6..194bfcee9ef75 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg7ei16.c index c98764ac0bca3..d20db96d60d78 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg8ei16.c index 2cc55d016c375..6826edda32946 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vluxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, @@ -22,7 +22,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, @@ -33,7 +33,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, @@ -44,7 +44,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, @@ -56,7 +56,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, @@ -68,7 +68,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg2ei16.c index f96a74ae467c5..710bde9c51595 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, @@ -66,7 +66,7 @@ void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, @@ -78,7 +78,7 @@ void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, @@ -90,7 +90,7 @@ void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -101,7 +101,7 @@ void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, @@ -112,7 +112,7 @@ void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg3ei16.c index 04a2b81fd0ac3..962b1f4f92a9a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg4ei16.c index 35b24d5d2027f..d34f441a2b92a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg5ei16.c index f79ad6ed38105..a2a0da23de8ab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg6ei16.c index 71a02db956d73..874e2213bc1cd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg7ei16.c index 1d0e55a4c9b26..092feb27eea36 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg8ei16.c index cc28d61289d01..06b6a374fedc9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsoxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg2ei16.c index dd2aa78fe43af..35530a70e0429 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m1x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m2x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m4x2( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, @@ -66,7 +66,7 @@ void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, @@ -78,7 +78,7 @@ void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16mf2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, @@ -90,7 +90,7 @@ void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m1x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -101,7 +101,7 @@ void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m2x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, @@ -112,7 +112,7 @@ void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_bf16m4x2_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg3ei16.c index d18dd779b0e4f..a6a88aad08908 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf4x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m1x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m2x3( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf4x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16mf2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m1x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_bf16m2x3_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg4ei16.c index 82b70986cac0d..0e81863acecaa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf4x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m1x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m2x4( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, @@ -55,7 +55,7 @@ void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf4x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, @@ -67,7 +67,7 @@ void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16mf2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, @@ -79,7 +79,7 @@ void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m1x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, @@ -90,7 +90,7 @@ void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_bf16m2x4_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg5ei16.c index 376e72e90a76d..800fef28f29a8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf4x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf2x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16m1x5( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf4x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16mf2x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_bf16m1x5_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg6ei16.c index 15815128b9c84..16477ece39f54 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf4x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf2x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16m1x6( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf4x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16mf2x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_bf16m1x6_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg7ei16.c index 2ba27ec46f267..40ab7c0350d76 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf4x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf2x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16m1x7( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf4x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16mf2x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_bf16m1x7_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg8ei16.c index c29a17441c5b7..d8276b7fe40a8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/vsuxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf4x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, @@ -22,7 +22,7 @@ void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf2x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, @@ -33,7 +33,7 @@ void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16m1x8( // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, @@ -44,7 +44,7 @@ void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf4x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, @@ -56,7 +56,7 @@ void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16mf2x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, @@ -68,7 +68,7 @@ void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_bf16m1x8_m( // CHECK-RV64-SAME: [[VM:%.*]], ptr noundef [[RS1:%.*]], [[VS2:%.*]], target("riscv.vector.tuple", , 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VS3]], ptr [[RS1]], [[VS2]], [[VM]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c index de6d0fbcdcaae..b51d800d6ad7a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c index acbfa82b8bb2d..08c15b0220d53 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c index 3de2eaf48bcb3..f031fd3702962 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c index bb70a3d9d1b8b..0737c6f85bc1e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c index bdd978827ec76..4b1cc342e9581 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c index 103ab7ffc06fd..98fad9d89fb04 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c index 2168d23bde06a..45b18229a883a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c index 28764d6336c15..1713de8b096f4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c index 2b70ba4e781d8..acdba231e3f9d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c index 7395b9e72fc67..ed0fdc8d17571 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c index 51f073a75a7a0..fc3bdf1b4fbab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c index c5a0d137c3866..88788823c84a2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c index b739b8b1eb1ea..7e94ffffb1983 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c index 5a526c8839b3c..a6129b9a7c378 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c index 49c6e04f5daa2..77c72e14f53a2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c index 044281b729f23..5b68d045161aa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c index be6b1bbebbb93..d91105046d96c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c index c38fe1a707475..dd96b82b754be 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c index 93cf75713af6b..ce9cf6ed9fa09 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c index 6e331cc1b72e2..3e7fa1893cbe6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c index 7db0e894c8504..16f6133a05f42 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c index 66b6ecc6bc900..6fb0e7c9d647c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c index 9fa71b6c778a9..9f8f5b19b58ff 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c index ff6d8ef7d6968..dec7a0dcd4c98 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c index 17db14389e21e..47e75270b22c9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c index 31a311cf5294e..3ae4b67064a83 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c index ce1fd5ed1d645..bd3754b67c700 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c index baf672a878706..f8c8e1d1c6e1b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c index a033e06c45799..2a349939a3987 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_m(vbool4_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c index 3ec0991f4c59f..b1bec34c098cb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_m(vbool8_t mask, const float *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_m(vbool8_t mask, const int32_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c index a609c0f3e62f6..b48da19001a54 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_m(vbool16_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c index b52c263d28892..5bb88304b1f76 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_m(vbool2_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c index f412a3be51cfb..8acb256150250 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c index a56c15ad537c9..448f621c6e954 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c index 70ed14c9910f4..a4716a3132094 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c index 7395bbd2649b8..855658609e3ae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c index 001a750df5ea5..95899e8f9cc58 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_m(vbool8_t mask, const int16_t *base, si // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c index 3b88462c6605a..3fe6d365d54a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_m(vbool16_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_m(vbool16_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c index ac9d69b664279..6c5434f16ec58 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_m(vbool32_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c index 2a2aebb2500a4..510dc81647533 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_m(vbool4_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, size // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c index 627c7fc71ef64..2b6a048e46f2a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c index 9772c4956e99b..b386f0451fd0b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c index 2af70f1612a21..93463e0b0039e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c index dfa5c265cf809..f24120977a163 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c index 08e2b267bee98..8ab64ec49e89a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c index 26eac4e239b36..c68013e07e8bc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c index 804482e9fcd01..ef0dd6374f782 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c index f2983108ab7e6..606b27a588f28 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c index b93a5b71ef81a..e2e7e21ba0a5a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c index bff80fd0c0050..f550e5f3bf87b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c index 2bcf4fdb1ec83..74ed16b165ee3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c index 24d96adb44d9a..476b1a44765cc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c index 8c4511912763b..659b7776a838c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_m(vbool16_t mask, const int16_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c index 9f8106c559089..589735278d023 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_m(vbool32_t mask, const float *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_m(vbool32_t mask, const int32_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c index dde642f10d2f3..9cedb2066c0f4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_m(vbool64_t mask, const int64_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c index eb410763c10c5..ccdc2ebbca42b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_m(vbool8_t mask, const int8_t *base, size_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c index 660394affe481..29f3bf60bf8ae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const _Float16 *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const double *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c index 297609ed5712f..19690a05aa363 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const _Float16 *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const double *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c index e61165065d9d9..35aa11afa0388 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const double *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c index 0e8dad0824004..0536770cbef43 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const _Float16 *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const double *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c index 7beec8b207da1..07300daf7b193 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c index 89e690a20be3f..6cd1df5ce3eaf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c index 073a83536d33e..1118ad4c09eae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c index fe7d9c403a50f..e28596034875f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c index 71a1f9b158011..1ab0adef8484e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const _Float16 *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const double *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c index 8b54262def489..d5f59061e9b68 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const _Float16 *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const double *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c index cfd67e337082e..77cd350af20f1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const _Float16 *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const double *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c index 6f3db167a7729..dd7381a4ffd0d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const _Float16 *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const double *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c index 3361b70ab5055..508d900a36640 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c index 6b7dad071c151..cd8bbc980cc8e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c index 26fef0384cd7a..709eb1bd9c01e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c index 6193e9f164eaa..177eb292a2d15 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c index edbbae46d6863..3ef4d1861bc57 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c index a3461cab1abe9..ada8dd83c3b4a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c index ded2b2562dc05..818869781f963 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c index 99f137c6b0c73..03d28a71fefbf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c index 5190eb108b786..a438a16182d9f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c index c475d6345f010..9e43c1ac1f6e4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c index baa995b7b1de6..edf76fa836f7f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c index 14acaf8c5d5f0..bf354add31165 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c index 418a5dffca665..1c8dd615e0240 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const _Float16 *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const double *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c index 9122a0cfb4ece..cef6b7bf7ddec 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const _Float16 *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const _Float16 *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const double *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c index de96b1d64c652..0625d88ad1bdc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const _Float16 *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const _Float16 *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const _Float16 *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const double *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const _Float16 *b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t mask, const _Float16 *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c index 713697603e039..a00686ea71cab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const _Float16 *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const _Float16 *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const _Float16 *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const double *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, siz // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, s // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t binde // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const _Float16 *ba // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t mask, const _Float16 *base // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t mask, const double *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *bas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) poison, ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c index 115ad00463fe5..5ca25c2e5bd16 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c index ad3000ce5851f..c1493f0ebac5c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c index 065a9e2170153..f378c2a2fd230 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c index 062943ec7a35b..d0229ef3ed21f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c index 1b37047f28c1a..56a1c21958fa6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c index a28fcd5801af6..8837a8ad83d41 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c index 43bc95f659687..10573d1f29203 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c index 6eca72fd39d76..b7aa2151a679a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c index 1c26521c9bbde..06fce8976a82d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c index fb6eb364cf34b..0e0d114418be1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c index 139e2f7e8ab91..0553f93d56d95 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c index df9497d2d3a78..436cc39d0ab4f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c index 0daa4d2893698..bf98cdf799ba4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c index 4f06f94a4e6d5..c3c2ab27fb3a8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c index 4de616b8ad37c..4476e538e80be 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c index 291745f767f44..f33b8d18f4e16 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c index 7c155a4cdbba1..b6fb381504514 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c index 71c7d85c6f121..9da707d008fa1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c index 004a7fbeb435b..0c5e06ddef758 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c index 568af717287de..d5224badfbc84 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c index 602816ca1691f..857b9afca051f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c index e1f04c0792b08..222464f3ce37e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c index 3c9a3dd78af4c..795974c2ea841 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c index 5cd3d0f699720..2f1be2a04df51 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c index 69574cd00d7b7..f5a6292f24112 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c index fcf212a759ddd..36e3a232629ab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c index 15a824dbbfac7..d821dbc0e7e52 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c index 7bcd658af4694..877523fcb752e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsoxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsoxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsoxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsoxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsoxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsoxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c index a0297b1d816a4..44db95833e3f4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c index c23d7ea75dbec..d99295446519c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c index b082b82093cb3..9184d7d6f622f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c index 6cb00b9aaaf4f..16da657a4f9fa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { @@ -750,7 +750,7 @@ void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { @@ -760,7 +760,7 @@ void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { @@ -770,7 +770,7 @@ void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { @@ -780,7 +780,7 @@ void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { @@ -790,7 +790,7 @@ void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { @@ -800,7 +800,7 @@ void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { @@ -810,7 +810,7 @@ void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { @@ -820,7 +820,7 @@ void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { @@ -830,7 +830,7 @@ void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { @@ -840,7 +840,7 @@ void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { @@ -850,7 +850,7 @@ void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { @@ -860,7 +860,7 @@ void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { @@ -870,7 +870,7 @@ void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { @@ -880,7 +880,7 @@ void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { @@ -890,7 +890,7 @@ void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { @@ -900,7 +900,7 @@ void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { @@ -910,7 +910,7 @@ void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { @@ -920,7 +920,7 @@ void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { @@ -930,7 +930,7 @@ void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { @@ -940,7 +940,7 @@ void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { @@ -950,7 +950,7 @@ void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { @@ -960,7 +960,7 @@ void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 2) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c index 66637eea967a6..7889f09ce38b7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c index 35416daa7a3cd..42663370dc798 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c index b706a9666d0d1..5619f8729d2c3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c index 8d025c252fc38..ea0534365ddc3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 3) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c index ced4e074aefca..f0156c8bc5ef7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c index 5bcab9cf1a7af..4441ccedaa3da 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c index e39b3bd2d6ae3..8ebcfb23623f0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c index c732a4b430a84..6caee3c0e4f69 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { @@ -530,7 +530,7 @@ void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { @@ -540,7 +540,7 @@ void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { @@ -550,7 +550,7 @@ void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { @@ -560,7 +560,7 @@ void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { @@ -570,7 +570,7 @@ void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { @@ -580,7 +580,7 @@ void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { @@ -590,7 +590,7 @@ void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { @@ -600,7 +600,7 @@ void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { @@ -610,7 +610,7 @@ void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { @@ -620,7 +620,7 @@ void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { @@ -630,7 +630,7 @@ void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { @@ -640,7 +640,7 @@ void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { @@ -650,7 +650,7 @@ void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { @@ -660,7 +660,7 @@ void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { @@ -670,7 +670,7 @@ void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { @@ -680,7 +680,7 @@ void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { @@ -690,7 +690,7 @@ void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { @@ -700,7 +700,7 @@ void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { @@ -710,7 +710,7 @@ void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { @@ -720,7 +720,7 @@ void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { @@ -730,7 +730,7 @@ void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { @@ -740,7 +740,7 @@ void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 4) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c index ef11cf64bd53b..5c7395f1a2a09 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c index cac07a1aa1ca7..00e4716560138 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c index 55a6103d0a8e1..d8b909d7f1bfd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c index 4090505cddf5f..459888d28e212 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 5) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c index c25494c9f1e7a..3a332accd3361 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c index 12aa848ea6b3b..a47f9ba19eda4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c index 10ef73cb3e9da..d94fac600dea8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c index 452a66e3183cd..e525ed065fc05 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c index ca9cb0a8bc213..d4f6f78da6125 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c index 36ed09a6f4fb4..94a88711e94c6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c index 4c0d5582df462..b0c379840b020 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c index 16f6f577a62a9..1492a10ed7c8c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 7) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c index a2026d9daf021..c3d66c2b3a0a0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c index 4eba496ef338e..88bcc546b1f2a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16m // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c index 9c1dfeaa0601f..a8994a0ba39ec 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c index e4ea69a3691e0..0d3995dbb4b94 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -20,7 +20,7 @@ void test_vsuxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -30,7 +30,7 @@ void test_vsuxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -40,7 +40,7 @@ void test_vsuxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8 // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -50,7 +50,7 @@ void test_vsuxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -60,7 +60,7 @@ void test_vsuxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -70,7 +70,7 @@ void test_vsuxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -80,7 +80,7 @@ void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -90,7 +90,7 @@ void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -100,7 +100,7 @@ void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -110,7 +110,7 @@ void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tu // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -120,7 +120,7 @@ void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -130,7 +130,7 @@ void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -140,7 +140,7 @@ void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -150,7 +150,7 @@ void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -160,7 +160,7 @@ void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -170,7 +170,7 @@ void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -180,7 +180,7 @@ void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -190,7 +190,7 @@ void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -200,7 +200,7 @@ void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -210,7 +210,7 @@ void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -220,7 +220,7 @@ void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -230,7 +230,7 @@ void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -240,7 +240,7 @@ void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -250,7 +250,7 @@ void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -260,7 +260,7 @@ void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { @@ -270,7 +270,7 @@ void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_ // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { @@ -280,7 +280,7 @@ void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { @@ -290,7 +290,7 @@ void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { @@ -300,7 +300,7 @@ void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { @@ -310,7 +310,7 @@ void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { @@ -320,7 +320,7 @@ void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t binde // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { @@ -330,7 +330,7 @@ void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { @@ -340,7 +340,7 @@ void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { @@ -350,7 +350,7 @@ void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { @@ -360,7 +360,7 @@ void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bind // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { @@ -370,7 +370,7 @@ void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { @@ -380,7 +380,7 @@ void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { @@ -390,7 +390,7 @@ void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { @@ -400,7 +400,7 @@ void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { @@ -410,7 +410,7 @@ void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { @@ -420,7 +420,7 @@ void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { @@ -430,7 +430,7 @@ void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { @@ -440,7 +440,7 @@ void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { @@ -450,7 +450,7 @@ void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { @@ -460,7 +460,7 @@ void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bin // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { @@ -470,7 +470,7 @@ void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { @@ -480,7 +480,7 @@ void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { @@ -490,7 +490,7 @@ void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { @@ -500,7 +500,7 @@ void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { @@ -510,7 +510,7 @@ void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t b // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { @@ -520,7 +520,7 @@ void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bi // CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], target("riscv.vector.tuple", , 8) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[V_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg2ei16.c index cd0281a4f9367..7c8a70e83104f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -171,7 +171,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -181,7 +181,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -191,7 +191,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -201,7 +201,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg3ei16.c index b93d1a983159c..708a3dfe94fa2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg4ei16.c index 6cdd4621644c3..af80f6a8497f7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg5ei16.c index 527ff1d53054c..c373937a7a4c0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg6ei16.c index b762fc064f0bc..0bd15e5c202d6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg7ei16.c index 9bf2ca156f7b9..81547609fff38 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg8ei16.c index 2b2f910b29694..e7b133f0a52fc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vloxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg2e16ff.c index 7e061c6ca7f46..e2d23127f2762 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg2e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -167,7 +167,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -180,7 +180,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -193,7 +193,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -206,7 +206,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -219,7 +219,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -232,7 +232,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -245,7 +245,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -258,7 +258,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg3e16ff.c index 8dcc5a84cfd0e..837a524dfff61 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg3e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -167,7 +167,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -180,7 +180,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -193,7 +193,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -206,7 +206,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg4e16ff.c index 6f916412ebcd1..950b32dba1931 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg4e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -167,7 +167,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -180,7 +180,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -193,7 +193,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -206,7 +206,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg5e16ff.c index 3937a1b1dd136..aacccc54f9db4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg5e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg6e16ff.c index e5aa575aca3b8..a8f113a109d81 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg6e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg7e16ff.c index 9948deaa59d1b..9aac3180827a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg7e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg8e16ff.c index 18c0611ae8ed1..2aec5af87f37a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vlseg8e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -24,7 +24,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -37,7 +37,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -50,7 +50,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -63,7 +63,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -76,7 +76,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -89,7 +89,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -115,7 +115,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -128,7 +128,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -141,7 +141,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -154,7 +154,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg2ei16.c index 1e2de39ee6d1a..a1acdb3f15e7a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -171,7 +171,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -181,7 +181,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -191,7 +191,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -201,7 +201,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint16m4_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg3ei16.c index 2d93869ab4605..cb3ac512fb35b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg4ei16.c index da0bfe95f1363..275cedf059963 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { @@ -131,7 +131,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -141,7 +141,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -151,7 +151,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -161,7 +161,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint16m2_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg5ei16.c index e8e40a6c5c784..fd99913b63b4e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg6ei16.c index 120a836c0155c..af9b21fa70e8b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg7ei16.c index 52ea20e98539d..e9916204e6f11 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg8ei16.c index 0e16781cef05d..c3d2c83ddcdca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/vluxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -21,7 +21,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -31,7 +31,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __b // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -41,7 +41,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -51,7 +51,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -61,7 +61,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -71,7 +71,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t v // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -81,7 +81,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -91,7 +91,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { @@ -101,7 +101,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint16mf4_t rs2, size_t vl) { @@ -111,7 +111,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint16mf2_t rs2, size_t vl) { @@ -121,7 +121,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint16m1_t rs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c index f7d024c6cb3c5..6680cbd126167 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c index b2798c3fb3531..1d1ac40541961 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c index 081306c85fccc..15708d0c8e23e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c index 42ff16a18ea34..4c3bdc68ffc07 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c index 3f00ae95b7fa0..60b9524252a59 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c index 34b29b3c139e1..25fb9d25f32f2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c index 72b0923a36f31..d04a27497f4de 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c index 4d105b2c80e9d..c2f03cec985fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c index 8ca3d370ee649..f08193e25ff1d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c index db19aa00d88aa..543e43bc663d6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c index dc250b869f77e..5bfa5874be335 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c index cce3cad35c11c..db3d822600b3e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c index eb18c32c292b1..83591763950e2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c index afd9521607a3b..0712efc7ce218 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c index 0dadc3145353a..a8acbeb8775f2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c index bd2534f42d8c0..2f1501a75fe64 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c index 69278176e8cf2..624004742e456 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c index a1e8d1397bc1a..c439d71f80a70 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c index 08a39a8142fc9..d0def11112272 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c index 7b58ff6c8d37c..7268c800edaae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c index 226f2e8062f85..e04a78db3a5b0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c index 9b0dbd68900e0..2b43eb52b5a1a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c index 0c1a25dad8f2f..7a5be2a59df4e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c index 1441c80b32989..1be6adfd87922 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c index 0d2833b247a55..dbc35725d5d40 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c index e4f74b7990427..da2173531ff17 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c index 0c23f082e8eed..e4238cbdbabc5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c index 23efe2b17f424..875b8f7f9e595 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c index d651f99a0d124..4e0e031462db3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -634,7 +634,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -647,7 +647,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -660,7 +660,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -673,7 +673,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -686,7 +686,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -699,7 +699,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -712,7 +712,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -725,7 +725,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -738,7 +738,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -751,7 +751,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -764,7 +764,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -777,7 +777,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c index cf826bd71012c..18633b89ba11a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c index 1d2fec0fe4b9b..7a359601a9a34 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c index a3dad7ddaab1d..0b675c61e395c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c index 4cf5a2a939eaa..1d043779747ab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c index 3a06d36f93035..a0d29dc43e7ab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c index 399c35418bd79..5c9aecbba2411 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c index 2881994107e1f..eba0421b8c8e7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c index 7706fd521ad09..995a772e20924 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c index 5fb3210e81dc4..fa3ae9ca72ddc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c index af1b4af1f265b..264825df7a733 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c index d77f71b48e994..227a69d64f21b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c index 9631b7efabb57..21b4303bc0458 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c index 7872d8226c1f6..7932e5ba6ac42 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c index 76083f6a7117f..f0cc0246fe817 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c index 046c34841108f..90db070fe82ab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c index 94b2772d3338c..8e15288728d56 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c index a642ef922db72..7a0274850d5b8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c index 2115568670ef4..4cfa72df5c022 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c index 854f4f2c05181..17155282e1938 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c index 3b465f0a7d68a..13998a359a01f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c index 28d39419f3195..42ea055a90817 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c index d97e281d51f8f..315b7a1d05d02 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c index 6dbe69b968d9a..464ea2a2fc53b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c index 20560db1cb77d..7fbfa4c78af39 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c index b970eae6c7715..643440812419b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c index 4eef00d4c9cf5..eb0c5910e6d6b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c index db9c6f9c211ed..e1bce794c6df7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c index 2f02aaabffd43..0578d07a88829 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c index 43b6856feb36a..f0e759c4e82b2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c index e6a92ca455f63..76a315b939196 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c index dd1a26e676652..73729d3f24dac 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c index dcca2773cf6a8..49426d4000aec 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c index 50144a47f0712..c860976a327f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c index be427b31c3707..e3ebbfbb5ae90 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c index adf0a87100388..c10e39bc7fa85 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c index 148649cb64697..7b5ee95f459c2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c index 24087a19fba50..fa13477cbae8e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c index 2ec9dc591aff2..12ba944cde212 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c index df5720ce1c427..f5d0c3eb8edb3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c index 8bdf6196da7ff..f2ae4592ada79 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c index 3d7f11ee9903f..cda24727bdd89 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c index dcb417756b022..cb9216a7b3086 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c index e85c8a07b8b6f..07fc7b954f4d0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c index 7d547782832bc..02ed65e7435c2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c index ade2f409d50f9..2a4901e7d1f2e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c index f7f5b3a6d158e..09c15b9ca49c1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c index 33e962badb5d1..a43bc110aa5fb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c index 8391a03136229..542c0616d7d8b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c index b2acbddd2e66e..a2dc98132d87d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c index aaf383a087458..962f059ba08ac 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c index 563c4f38e7ae3..655a310793dff 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c index e7cfac51fdfcc..d5f9039a328e0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c index 0e7e0ab7f3d89..0804c874a0ac4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c index ec78fa6068329..fc5103356d289 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c index 7521260ff8d85..46b0f3cf3771e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg2ei16.c index b39a55d7720ea..5815a76a43ceb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, @@ -71,7 +71,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, @@ -85,7 +85,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, @@ -99,7 +99,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, @@ -112,7 +112,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, @@ -125,7 +125,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, @@ -138,7 +138,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, @@ -152,7 +152,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, @@ -166,7 +166,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, @@ -179,7 +179,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, @@ -192,7 +192,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, @@ -205,7 +205,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, @@ -218,7 +218,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, @@ -231,7 +231,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, @@ -244,7 +244,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, @@ -256,7 +256,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg3ei16.c index f6a46e633fd95..711167c67ecce 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, @@ -73,7 +73,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, @@ -87,7 +87,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, @@ -100,7 +100,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, @@ -113,7 +113,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, @@ -127,7 +127,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, @@ -141,7 +141,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, @@ -154,7 +154,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, @@ -167,7 +167,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, @@ -180,7 +180,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, @@ -193,7 +193,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, @@ -206,7 +206,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg4ei16.c index fa40e5c116ca8..45af79a960e91 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, @@ -73,7 +73,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, @@ -87,7 +87,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, @@ -100,7 +100,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, @@ -113,7 +113,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, @@ -127,7 +127,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, @@ -141,7 +141,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, @@ -154,7 +154,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, @@ -167,7 +167,7 @@ vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, @@ -180,7 +180,7 @@ vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, @@ -193,7 +193,7 @@ vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, @@ -206,7 +206,7 @@ vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg5ei16.c index c0dee3d304aa7..a7ea11363d5de 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg6ei16.c index abc424729a2d4..21130591c2db8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg7ei16.c index faa2e2266ac91..e48c8738f1e0a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg8ei16.c index 1cbbb6b56d6be..3fdf404383311 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vloxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg2e16ff.c index f3889f115a8e6..c6f72916ac8e4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg2e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tu(vbfloat16m1x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -71,7 +71,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tu(vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -86,7 +86,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tu(vbfloat16m4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -102,7 +102,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -118,7 +118,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -134,7 +134,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -149,7 +149,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -164,7 +164,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -180,7 +180,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -196,7 +196,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -212,7 +212,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -228,7 +228,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tumu(vbool4_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -260,7 +260,7 @@ vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -276,7 +276,7 @@ vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -291,7 +291,7 @@ vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -306,7 +306,7 @@ vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg3e16ff.c index c1fc7f13d64cb..a332d292f7900 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg3e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tu(vbfloat16m1x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -71,7 +71,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tu(vbfloat16m2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -87,7 +87,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -103,7 +103,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -119,7 +119,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -134,7 +134,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -150,7 +150,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -182,7 +182,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -198,7 +198,7 @@ vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -214,7 +214,7 @@ vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -230,7 +230,7 @@ vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -245,7 +245,7 @@ vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg4e16ff.c index fae68f7c9f360..004c75f9db6db 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg4e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tu(vbfloat16m1x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -71,7 +71,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tu(vbfloat16m2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -87,7 +87,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -103,7 +103,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -119,7 +119,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -134,7 +134,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -150,7 +150,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -182,7 +182,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -198,7 +198,7 @@ vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -214,7 +214,7 @@ vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -230,7 +230,7 @@ vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -245,7 +245,7 @@ vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg5e16ff.c index 3d98f55c39e21..764d77ede3689 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg5e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tu(vbfloat16m1x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -72,7 +72,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -104,7 +104,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -120,7 +120,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -136,7 +136,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -152,7 +152,7 @@ vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -168,7 +168,7 @@ vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -184,7 +184,7 @@ vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg6e16ff.c index 75ace1b4806e0..3e62a07c57937 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg6e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tu(vbfloat16m1x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -72,7 +72,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -104,7 +104,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -120,7 +120,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -136,7 +136,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -152,7 +152,7 @@ vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -168,7 +168,7 @@ vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -184,7 +184,7 @@ vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg7e16ff.c index ee59df4dd9b17..931e93c18b257 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg7e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tu(vbfloat16m1x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -72,7 +72,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -104,7 +104,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -120,7 +120,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -136,7 +136,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -152,7 +152,7 @@ vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -168,7 +168,7 @@ vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -184,7 +184,7 @@ vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg8e16ff.c index 3ccc427f64163..214253e9b619a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vlseg8e16ff.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -26,7 +26,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -41,7 +41,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -56,7 +56,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tu(vbfloat16m1x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -72,7 +72,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -104,7 +104,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -120,7 +120,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -136,7 +136,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -152,7 +152,7 @@ vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -168,7 +168,7 @@ vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -184,7 +184,7 @@ vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg2ei16.c index 9c510fa3e7a27..5b589b410311b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg2ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, @@ -71,7 +71,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, @@ -85,7 +85,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, @@ -99,7 +99,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, @@ -112,7 +112,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, @@ -125,7 +125,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, @@ -138,7 +138,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, @@ -152,7 +152,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, @@ -166,7 +166,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, @@ -179,7 +179,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, @@ -192,7 +192,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, @@ -205,7 +205,7 @@ vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, @@ -218,7 +218,7 @@ vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16mf2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, @@ -231,7 +231,7 @@ vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m1x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, @@ -244,7 +244,7 @@ vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m2x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, @@ -256,7 +256,7 @@ vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_bf16m4x2_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg3ei16.c index 90652e820bcf6..6d2583ed02a8e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg3ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, @@ -73,7 +73,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, @@ -87,7 +87,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, @@ -100,7 +100,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, @@ -113,7 +113,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, @@ -127,7 +127,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, @@ -141,7 +141,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, @@ -154,7 +154,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, @@ -167,7 +167,7 @@ vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf4x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, @@ -180,7 +180,7 @@ vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16mf2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, @@ -193,7 +193,7 @@ vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m1x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, @@ -206,7 +206,7 @@ vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_bf16m2x3_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg4ei16.c index df22e26960a3e..886d35f4f70a3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg4ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, @@ -59,7 +59,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, @@ -73,7 +73,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, @@ -87,7 +87,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, @@ -100,7 +100,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, @@ -113,7 +113,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, @@ -127,7 +127,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, @@ -141,7 +141,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, @@ -154,7 +154,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, @@ -167,7 +167,7 @@ vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf4x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, @@ -180,7 +180,7 @@ vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16mf2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, @@ -193,7 +193,7 @@ vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m1x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, @@ -206,7 +206,7 @@ vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_bf16m2x4_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg5ei16.c index cacd851f5e4e2..4223e926e0704 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg5ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf4x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16mf2x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_bf16m1x5_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg6ei16.c index 4754e8bb954b9..7eb149ea1bb10 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg6ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf4x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16mf2x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_bf16m1x6_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg7ei16.c index 80fda3c73155f..4ae7f446f511f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg7ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf4x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16mf2x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_bf16m1x7_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg8ei16.c index 930ab0a28b93a..9a8c076d6cc60 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/vluxseg8ei16.c @@ -11,7 +11,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, @@ -23,7 +23,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, @@ -35,7 +35,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tu( // CHECK-RV64-SAME: target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, @@ -47,7 +47,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, @@ -61,7 +61,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, @@ -75,7 +75,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tum( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, @@ -88,7 +88,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, @@ -102,7 +102,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, @@ -116,7 +116,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_tumu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, @@ -129,7 +129,7 @@ vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf4x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, @@ -142,7 +142,7 @@ vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16mf2x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, @@ -155,7 +155,7 @@ vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_bf16m1x8_mu( // CHECK-RV64-SAME: [[VM:%.*]], target("riscv.vector.tuple", , 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[VD]], ptr [[RS1]], [[RS2]], [[VM]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c index 6aa43b1375cba..cd47648203e91 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei16_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c index c414acc739f65..61e2694d2b9ec 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei32_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c index de7b96aae0447..3960e8209b607 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei64_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c index f8d01d0fc45ae..57347d70ec189 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vloxseg2ei8_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c index f45747061d73e..01b7d62ec654c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei16_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c index 90767841fdcb9..0f6e3c3707ba4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei32_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c index 1a7b4bedd2a2e..5a509b71175cb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei64_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c index 333e7eee028cb..dcc14036803a5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vloxseg3ei8_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c index 1cdc946d081d0..109f93c150eb6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei16_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c index ba84987fb9cae..14f4f454054f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei32_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c index f9cf85a37f952..f1c971f4a2921 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei64_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c index 4dd1f8708e933..72b0773de0cf2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vloxseg4ei8_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c index 5172dbf0a2b3f..f40b1c02dfe17 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei16_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c index 5a4ebcb8f9e93..8565c80a26b7e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei32_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c index 8acc1c3670d3a..eddda795a27bb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei64_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c index a54ef5a6d86ad..359bf4bbf299f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vloxseg5ei8_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c index b7afe98d2c6bb..1b779ac9f5e59 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei16_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c index 24216a4b54b10..598b68d775814 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei32_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c index 075e498b97e2f..884468562aed9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei64_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c index 123cbe6496696..1b0ed30ea445f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vloxseg6ei8_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c index b35e90251d0c3..f619f8ea91fbc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei16_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c index 3e4ef02fc1f78..854cb25da58f0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei32_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c index 8e978cfd02719..367afe9d88f9e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei64_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c index 21c84239ffd6c..058d9bbc72b72 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vloxseg7ei8_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c index 28259081f8c0f..8daad223e1691 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei16_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c index c29b2f767a551..e798d230d8a9a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei32_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c index cf79fb0e8d961..1a784ba93af92 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei64_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c index 37d390ba25aed..a99d06a606809 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vloxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vloxseg8ei8_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c index a08e624f5de06..099f6de35bc9c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -634,7 +634,7 @@ vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -647,7 +647,7 @@ vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -660,7 +660,7 @@ vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -673,7 +673,7 @@ vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -686,7 +686,7 @@ vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -699,7 +699,7 @@ vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -712,7 +712,7 @@ vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -725,7 +725,7 @@ vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -738,7 +738,7 @@ vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -751,7 +751,7 @@ vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -764,7 +764,7 @@ vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -777,7 +777,7 @@ vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e16ff_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c index 7f5b21eba8583..4bdfcf3d59bfd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e32ff_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c index e21b596ee03c4..5729254e5493d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e64ff_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c index f4591392f15b3..4377ce7f64b6d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg2e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.i64.nxv1i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv2i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv4i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv8i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv16i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vlseg2e8ff_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv32i1(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 2), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c index c82c81744616f..1d673a456c309 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e16ff_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c index 42a4aa325de6e..8c8c24294310c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e32ff_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c index e139dacba5aa0..aecfc5445659d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e64ff_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c index 84ff42ce5a04f..3fc5c4bdbb2e0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg3e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.i64.nxv1i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv2i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv4i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv8i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vlseg3e8ff_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv16i1(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 3), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c index e957dd7705f4a..1250ab6ebb2c4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -530,7 +530,7 @@ vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -543,7 +543,7 @@ vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -556,7 +556,7 @@ vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -569,7 +569,7 @@ vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -582,7 +582,7 @@ vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -595,7 +595,7 @@ vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -608,7 +608,7 @@ vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -621,7 +621,7 @@ vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e16ff_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c index 173b8df4a9396..4dc3cfd4d1880 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e32ff_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c index cc720676a8e8e..f4ed85abeba0e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e64ff_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c index d16934af48a0c..d3a9b995bcbf1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg4e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.i64.nxv1i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -478,7 +478,7 @@ vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.i64.nxv2i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -491,7 +491,7 @@ vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.i64.nxv4i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -504,7 +504,7 @@ vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.i64.nxv8i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -517,7 +517,7 @@ vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vlseg4e8ff_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 4), i64 } @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.i64.nxv16i1(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 4), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c index 99969b08168b7..0a520d75d5cfa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e16ff_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c index fa7c0bb1c6115..f9c0f7f153b39 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e32ff_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c index 4d2e08e5dfc25..9bc778dfd3a73 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e64ff_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c index 3494e012e929c..8bbfe7d88fe18 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg5e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.i64.nxv1i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.i64.nxv2i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.i64.nxv4i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vlseg5e8ff_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv8i1(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 5), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c index 5e96a0d99a123..51af97a8d2dee 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e16ff_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c index 4aaca57cdff92..76e8661dfe5d9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e32ff_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c index 3e711a6cbc32f..8bc6d3f699425 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e64ff_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c index 572acf6f73f1a..abf1d50c7ac8f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg6e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.i64.nxv1i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.i64.nxv2i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.i64.nxv4i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vlseg6e8ff_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 6), i64 } @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv8i1(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 6), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c index b1962ca6136f9..472a9c342ec44 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e16ff_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c index 05eadd643bedf..74397d53c1731 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e32ff_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c index a4071aca53b19..5a08a7c94dc05 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e64ff_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c index cf44ba1ced5f4..1aa853e9738bb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg7e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.i64.nxv1i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.i64.nxv2i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.i64.nxv4i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vlseg7e8ff_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 7), i64 } @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.i64.nxv8i1(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 7), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c index a8629007726bc..24e50a9007538 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e16ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -426,7 +426,7 @@ vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -439,7 +439,7 @@ vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -452,7 +452,7 @@ vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -465,7 +465,7 @@ vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e16ff_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c index 49550b8e08be5..1651976c22487 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e32ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e32ff_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c index b92ca837fe513..6d091682ca2b9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e64ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e64ff_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c index b360e4524e5df..2e0f7edd67d41 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlseg8e8ff.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -23,7 +23,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -36,7 +36,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -49,7 +49,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const in // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -62,7 +62,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -75,7 +75,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -88,7 +88,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -101,7 +101,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -114,7 +114,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uin // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -127,7 +127,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -140,7 +140,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -153,7 +153,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -166,7 +166,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -179,7 +179,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -192,7 +192,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -205,7 +205,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -218,7 +218,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -231,7 +231,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -244,7 +244,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -257,7 +257,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -270,7 +270,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -283,7 +283,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -296,7 +296,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -309,7 +309,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -322,7 +322,7 @@ vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -335,7 +335,7 @@ vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -348,7 +348,7 @@ vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -361,7 +361,7 @@ vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -374,7 +374,7 @@ vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tup // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.i64.nxv1i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -387,7 +387,7 @@ vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.i64.nxv2i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -400,7 +400,7 @@ vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.i64.nxv4i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 @@ -413,7 +413,7 @@ vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vlseg8e8ff_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", , 8), i64 } @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.i64.nxv8i1(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", , 8), i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c index bb8b45d7189fd..c966c645d640d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint16m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint16m1_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei16_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c index dd2930586dfee..2f1de2e2bcd1f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint32m4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint32m2_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei32_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c index 60421bdbe9c22..6fc021d374d18 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei64_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c index f912905043c05..efe58ac87a6a4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg2ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1490,7 +1490,7 @@ vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -1500,7 +1500,7 @@ vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1510,7 +1510,7 @@ vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1520,7 +1520,7 @@ vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1530,7 +1530,7 @@ vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, vuint8m1_t bindex, size_t vl) { @@ -1540,7 +1540,7 @@ vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1550,7 +1550,7 @@ vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1560,7 +1560,7 @@ vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_f64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1570,7 +1570,7 @@ vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1580,7 +1580,7 @@ vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1590,7 +1590,7 @@ vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1600,7 +1600,7 @@ vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1610,7 +1610,7 @@ vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1620,7 +1620,7 @@ vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1630,7 +1630,7 @@ vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1640,7 +1640,7 @@ vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1650,7 +1650,7 @@ vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1660,7 +1660,7 @@ vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1670,7 +1670,7 @@ vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1680,7 +1680,7 @@ vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1690,7 +1690,7 @@ vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1700,7 +1700,7 @@ vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1710,7 +1710,7 @@ vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1720,7 +1720,7 @@ vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1730,7 +1730,7 @@ vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1740,7 +1740,7 @@ vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_i64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1750,7 +1750,7 @@ vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf8x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1760,7 +1760,7 @@ vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1770,7 +1770,7 @@ vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1780,7 +1780,7 @@ vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1790,7 +1790,7 @@ vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1800,7 +1800,7 @@ vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u8m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1810,7 +1810,7 @@ vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1820,7 +1820,7 @@ vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1830,7 +1830,7 @@ vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1840,7 +1840,7 @@ vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1850,7 +1850,7 @@ vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u16m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1860,7 +1860,7 @@ vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32mf2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1870,7 +1870,7 @@ vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1880,7 +1880,7 @@ vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1890,7 +1890,7 @@ vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u32m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1900,7 +1900,7 @@ vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m1x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1910,7 +1910,7 @@ vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m2x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1920,7 +1920,7 @@ vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 2) @test_vluxseg2ei8_v_u64m4x2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 2) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 2) [[TMP0]] // vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c index 56c5054595c1c..ffb5937ce2f8b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei16_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c index 0a76f7f967859..de568014da05c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei32_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c index 3a74ce27599f6..abce8ff9c67d1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei64_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c index 1ab3fdbaf275d..25da79e2d914d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg3ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_f64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_i64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf8x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u8m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf4x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u16m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32mf2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u32m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m1x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 3) @test_vluxseg3ei8_v_u64m2x3_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 3) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 3) [[TMP0]] // vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c index f875e26bac946..0d83740e23b7d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint16m1_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei16_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c index 7da0eb0e988a2..065ecabfad61d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint32m2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint32m1_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei32_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c index 969022df85ac7..fff24d86fca6e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint64m8_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint64m4_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint64m2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei64_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c index b03f80b32bc1e..8ab0ac946814b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg4ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1050,7 +1050,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1060,7 +1060,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1070,7 +1070,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1080,7 +1080,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1090,7 +1090,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1100,7 +1100,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1110,7 +1110,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1120,7 +1120,7 @@ vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1200,7 +1200,7 @@ vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_f64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1210,7 +1210,7 @@ vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1220,7 +1220,7 @@ vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1230,7 +1230,7 @@ vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1240,7 +1240,7 @@ vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1250,7 +1250,7 @@ vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1260,7 +1260,7 @@ vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1270,7 +1270,7 @@ vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1280,7 +1280,7 @@ vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1290,7 +1290,7 @@ vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1300,7 +1300,7 @@ vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1310,7 +1310,7 @@ vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1320,7 +1320,7 @@ vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1330,7 +1330,7 @@ vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1340,7 +1340,7 @@ vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_i64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1350,7 +1350,7 @@ vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf8x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1360,7 +1360,7 @@ vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1370,7 +1370,7 @@ vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1380,7 +1380,7 @@ vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1390,7 +1390,7 @@ vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u8m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1400,7 +1400,7 @@ vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf4x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1410,7 +1410,7 @@ vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1420,7 +1420,7 @@ vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1430,7 +1430,7 @@ vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u16m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1440,7 +1440,7 @@ vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32mf2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1450,7 +1450,7 @@ vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1460,7 +1460,7 @@ vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u32m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1470,7 +1470,7 @@ vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m1x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1480,7 +1480,7 @@ vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 4) @test_vluxseg4ei8_v_u64m2x4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 4) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 4) [[TMP0]] // vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c index 92e496ddcab8e..b9fca136e0e83 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei16_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c index c7198bda16f31..294cef4f0d7df 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei32_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c index 7f11665a1f84b..cf86aea703d50 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei64_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c index c6f57b747e378..919a2fa1b5798 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg5ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_f64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_i64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf8x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u8m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf4x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u16m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32mf2x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u32m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 5) @test_vluxseg5ei8_v_u64m1x5_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 5) [[TMP0]] // vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c index 53d5516cc8f05..2afaccc5db75d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei16_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c index 753d83d7f702a..49ba90c061bcc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei32_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c index 7eee29cbb0464..b74f589814fe4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei64_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c index 6d4e481861e49..cc6fcf629aa4f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg6ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_f64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_i64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf8x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u8m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf4x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u16m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32mf2x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u32m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 6) @test_vluxseg6ei8_v_u64m1x6_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 6) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 6) [[TMP0]] // vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c index 38fd9305e517e..4c877c226313f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei16_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c index 3496075dba3ee..dff7701a0a9ce 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei32_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c index e6ab589ffdb4b..0165dde122e1a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei64_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c index 0234cc1578014..61bf0539266b9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg7ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_f64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_i64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf8x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u8m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf4x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u16m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32mf2x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u32m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 7) @test_vluxseg7ei8_v_u64m1x7_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 7) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 7) [[TMP0]] // vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c index a2443b6036bca..21d0e62b82f34 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei16.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei16_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c index d2c764ad36a06..a3badb84fd9d9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei32.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint32m1_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei32_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c index af35e8ac5dd06..a70242d4ab5e2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei64.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const u // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, c // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint64m1_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint64m2_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint64m1_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei64_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c index c631245c3c10b..43e8acce98d1b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vluxseg8ei8.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -40,7 +40,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -50,7 +50,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -60,7 +60,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -70,7 +70,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, con // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -80,7 +80,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -90,7 +90,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -100,7 +100,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -110,7 +110,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -120,7 +120,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -130,7 +130,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -140,7 +140,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -150,7 +150,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, cons // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -160,7 +160,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -170,7 +170,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const i // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -180,7 +180,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -190,7 +190,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -200,7 +200,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -210,7 +210,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const ui // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -220,7 +220,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -230,7 +230,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -240,7 +240,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -250,7 +250,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, co // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -260,7 +260,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tu // CHECK-RV64-SAME: (target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], i64 [[VL]], i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -270,7 +270,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -280,7 +280,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -290,7 +290,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -300,7 +300,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -310,7 +310,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -320,7 +320,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -330,7 +330,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -340,7 +340,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -350,7 +350,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -360,7 +360,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -370,7 +370,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -380,7 +380,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -390,7 +390,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -400,7 +400,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -410,7 +410,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -420,7 +420,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -430,7 +430,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedo // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -440,7 +440,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -450,7 +450,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -460,7 +460,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -470,7 +470,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -480,7 +480,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -490,7 +490,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -500,7 +500,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -510,7 +510,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -520,7 +520,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 2, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -530,7 +530,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -540,7 +540,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -550,7 +550,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -560,7 +560,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -570,7 +570,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -580,7 +580,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t ma // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -600,7 +600,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -610,7 +610,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -620,7 +620,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -630,7 +630,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -640,7 +640,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -650,7 +650,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -660,7 +660,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -670,7 +670,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -680,7 +680,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -690,7 +690,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -700,7 +700,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -710,7 +710,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -720,7 +720,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -730,7 +730,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -740,7 +740,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -750,7 +750,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -760,7 +760,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -770,7 +770,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -780,7 +780,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 0, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -790,7 +790,7 @@ vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -800,7 +800,7 @@ vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -810,7 +810,7 @@ vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -820,7 +820,7 @@ vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -830,7 +830,7 @@ vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t m // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -840,7 +840,7 @@ vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_f64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t mask // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -860,7 +860,7 @@ vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -870,7 +870,7 @@ vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -880,7 +880,7 @@ vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -890,7 +890,7 @@ vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tu // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -900,7 +900,7 @@ vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -910,7 +910,7 @@ vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -920,7 +920,7 @@ vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -930,7 +930,7 @@ vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maske // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -940,7 +940,7 @@ vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_i64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -950,7 +950,7 @@ vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedof // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf8x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -960,7 +960,7 @@ vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -970,7 +970,7 @@ vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -980,7 +980,7 @@ vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u8m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 3) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -990,7 +990,7 @@ vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_ // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf4x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1000,7 +1000,7 @@ vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1010,7 +1010,7 @@ vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u16m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 4) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1020,7 +1020,7 @@ vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32mf2x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1030,7 +1030,7 @@ vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t mas // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u32m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 5) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1040,7 +1040,7 @@ vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t masked // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", , 8) @test_vluxseg8ei8_v_u64m1x8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", , 8) [[MASKEDOFF_TUPLE]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]], i64 1, i64 6) // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", , 8) [[TMP0]] // vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index de03aadefdd11..50f161fd38ce6 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -167,10 +167,36 @@ static VectorTypeModifier getTupleVTM(unsigned NF) { static_cast(VectorTypeModifier::Tuple2) + (NF - 2)); } +static unsigned getIndexedLoadStorePtrIdx(const RVVIntrinsic *RVVI) { + // We need a special rule for segment load/store since the data width is not + // encoded in the instrinsic name itself. + const StringRef IRName = RVVI->getIRName(); + constexpr unsigned RVV_VTA = 0x1; + constexpr unsigned RVV_VMA = 0x2; + + if (IRName.starts_with("vloxseg") || IRName.starts_with("vluxseg")) { + bool NoPassthru = + (RVVI->isMasked() && (RVVI->getPolicyAttrsBits() & RVV_VTA) && + (RVVI->getPolicyAttrsBits() & RVV_VMA)) || + (!RVVI->isMasked() && (RVVI->getPolicyAttrsBits() & RVV_VTA)); + return RVVI->isMasked() ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1; + } + if (IRName.starts_with("vsoxseg") || IRName.starts_with("vsuxseg")) + return RVVI->isMasked() ? 1 : 0; + + return (unsigned)-1; +} + // This function is used to get the log2SEW of each segment load/store, this // prevent to add a member to RVVIntrinsic. static unsigned getSegInstLog2SEW(StringRef InstName) { // clang-format off + // We need a special rule for indexed segment load/store since the data width + // is not encoded in the instrinsic name itself. + if (InstName.starts_with("vloxseg") || InstName.starts_with("vluxseg") || + InstName.starts_with("vsoxseg") || InstName.starts_with("vsuxseg")) + return (unsigned)-1; + #define KEY_VAL(KEY, VAL) {#KEY, VAL} #define KEY_VAL_ALL_W_POLICY(KEY, VAL) \ KEY_VAL(KEY, VAL), \ @@ -179,20 +205,20 @@ static unsigned getSegInstLog2SEW(StringRef InstName) { KEY_VAL(KEY ## _tumu, VAL), \ KEY_VAL(KEY ## _mu, VAL) -#define KEY_VAL_ALL_NF_BASE(MACRO_NAME, NAME, SEW, LOG2SEW, SUFFIX) \ - MACRO_NAME(NAME ## 2e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 3e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 4e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 5e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 6e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 7e ## SEW, LOG2SEW), \ - MACRO_NAME(NAME ## 8e ## SEW, LOG2SEW) +#define KEY_VAL_ALL_NF_BASE(MACRO_NAME, NAME, SEW, LOG2SEW, FF) \ + MACRO_NAME(NAME ## 2e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 3e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 4e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 5e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 6e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 7e ## SEW ## FF, LOG2SEW), \ + MACRO_NAME(NAME ## 8e ## SEW ## FF, LOG2SEW) #define KEY_VAL_ALL_NF(NAME, SEW, LOG2SEW) \ KEY_VAL_ALL_NF_BASE(KEY_VAL_ALL_W_POLICY, NAME, SEW, LOG2SEW,) #define KEY_VAL_FF_ALL_NF(NAME, SEW, LOG2SEW) \ - KEY_VAL_ALL_NF_BASE(KEY_VAL_ALL_W_POLICY, NAME, SEW, LOG2SEW, _FF) + KEY_VAL_ALL_NF_BASE(KEY_VAL_ALL_W_POLICY, NAME, SEW, LOG2SEW, ff) #define KEY_VAL_ALL_NF_SEW_BASE(MACRO_NAME, NAME) \ MACRO_NAME(NAME, 8, 3), \ @@ -208,11 +234,9 @@ static unsigned getSegInstLog2SEW(StringRef InstName) { // clang-format on static StringMap SegInsts = { - KEY_VAL_ALL_NF_SEW(vlseg), KEY_VAL_FF_ALL_NF_SEW(vlseg), - KEY_VAL_ALL_NF_SEW(vlsseg), KEY_VAL_ALL_NF_SEW(vloxseg), - KEY_VAL_ALL_NF_SEW(vluxseg), KEY_VAL_ALL_NF_SEW(vsseg), - KEY_VAL_ALL_NF_SEW(vssseg), KEY_VAL_ALL_NF_SEW(vsoxseg), - KEY_VAL_ALL_NF_SEW(vsuxseg)}; + KEY_VAL_ALL_NF_SEW(vlseg), KEY_VAL_FF_ALL_NF_SEW(vlseg), + KEY_VAL_ALL_NF_SEW(vlsseg), KEY_VAL_ALL_NF_SEW(vsseg), + KEY_VAL_ALL_NF_SEW(vssseg)}; #undef KEY_VAL_ALL_NF_SEW #undef KEY_VAL_ALL_NF @@ -231,6 +255,14 @@ void emitCodeGenSwitchBody(const RVVIntrinsic *RVVI, raw_ostream &OS) { if (RVVI->hasManualCodegen()) { OS << "IsMasked = " << (RVVI->isMasked() ? "true" : "false") << ";\n"; + + // Skip the non-indexed load/store and compatible header load/store. + OS << "if (SegInstSEW == (unsigned)-1) {\n"; + OS << " auto PointeeType = E->getArg(" << getIndexedLoadStorePtrIdx(RVVI) + << " )->getType()->getPointeeType();\n"; + OS << " SegInstSEW = " + " llvm::Log2_64(getContext().getTypeSize(PointeeType));\n}\n"; + OS << RVVI->getManualCodegen(); OS << "break;\n"; return;